import os
import random
import tensorboard
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras import *
from tensorflow.keras.utils import *
from sklearn.metrics import classification_report
train_path = "../dataset/train"
img_path = []
labels = []
for folder in os.listdir(train_path):
folder_path = os.path.join(train_path, folder)
for img in os.listdir(folder_path):
img_path.append(os.path.join(folder_path, img))
labels.append(folder)
df = pd.DataFrame({
"image" : img_path,
"label" : labels
})
df.count()
image 1953 label 1953 dtype: int64
df['label'].value_counts().count()
4
df['label'].value_counts()
bald_eagle 523 racoon 509 elk 462 raven 459 Name: label, dtype: int64
plt.bar(
df['label'].value_counts().keys(),
df['label'].value_counts().values,
color='c',
width=.75
)
plt.xlabel("Class", fontweight ='bold')
plt.ylabel("Number", fontweight ='bold')
plt.title("Number of Each Class in Train Folder", fontweight ='bold')
plt.show()
for name in df['label'].value_counts().keys():
rand_num = random.randint(0, len(df))
while df['label'][rand_num] != name:
rand_num = random.randint(0, len(df))
img = load_img(df['image'][rand_num])
title = df['label'][rand_num]
plt.imshow(img)
plt.title(title)
plt.show()
classes_labels = {
df['label'].value_counts().keys()[i] : i for i in range(0, df['label'].value_counts().count())
}
classes_labels
{'bald_eagle': 0, 'racoon': 1, 'elk': 2, 'raven': 3}
df['label']
0 bald_eagle
1 bald_eagle
2 bald_eagle
3 bald_eagle
4 bald_eagle
...
1948 racoon
1949 racoon
1950 racoon
1951 racoon
1952 racoon
Name: label, Length: 1953, dtype: object
for i in range(len(df)):
df['label'][i] = classes_labels[df['label'][i]]
df['label']
0 0
1 0
2 0
3 0
4 0
..
1948 1
1949 1
1950 1
1951 1
1952 1
Name: label, Length: 1953, dtype: object
one_hot = [str(to_categorical(i, df['label'].value_counts().count())) for i in df['label']]
df['label'] = one_hot
df['label']
0 [1. 0. 0. 0.]
1 [1. 0. 0. 0.]
2 [1. 0. 0. 0.]
3 [1. 0. 0. 0.]
4 [1. 0. 0. 0.]
...
1948 [0. 1. 0. 0.]
1949 [0. 1. 0. 0.]
1950 [0. 1. 0. 0.]
1951 [0. 1. 0. 0.]
1952 [0. 1. 0. 0.]
Name: label, Length: 1953, dtype: object
dataGenerator = preprocessing.image.ImageDataGenerator(
rescale = 1 / 255.0,
validation_split = 0.25,
)
df = df.sample(frac = 1)
df
| image | label | |
|---|---|---|
| 782 | ../dataset/train/raven/9b5647442c6609aadc.jpg | [0. 0. 0. 1.] |
| 1563 | ../dataset/train/racoon/94f4a55c53e0c87d75.jpg | [0. 1. 0. 0.] |
| 1116 | ../dataset/train/elk/f48c4df3e3e8d1ac62.jpg | [0. 0. 1. 0.] |
| 1573 | ../dataset/train/racoon/cbc0caef3e4180b471.jpg | [0. 1. 0. 0.] |
| 1897 | ../dataset/train/racoon/664876adcb93f05a9f.jpg | [0. 1. 0. 0.] |
| ... | ... | ... |
| 147 | ../dataset/train/bald_eagle/810258a8327f366972... | [1. 0. 0. 0.] |
| 867 | ../dataset/train/raven/d1d71923663c83b1c8.jpg | [0. 0. 0. 1.] |
| 379 | ../dataset/train/bald_eagle/ae17098e5c7ba694ed... | [1. 0. 0. 0.] |
| 1045 | ../dataset/train/elk/5affc564162ef4b10b.jpg | [0. 0. 1. 0.] |
| 526 | ../dataset/train/raven/8ba35c2203a75cde60.jpg | [0. 0. 0. 1.] |
1953 rows × 2 columns
TrainData = dataGenerator.flow_from_dataframe(
dataframe = df,
x_col = 'image',
y_col = 'label',
target_size = (128, 128),
color_mode = 'grayscale',
batch_size = 32,
class_mode = 'categorical',
subset = 'training',
shuffle = False,
validate_filenames = False
)
Found 1465 non-validated image filenames belonging to 4 classes.
ValidationData = dataGenerator.flow_from_dataframe(
dataframe = df,
x_col = 'image',
y_col = 'label',
target_size = (128, 128),
color_mode = 'grayscale',
batch_size = 32,
class_mode = 'categorical',
subset = 'validation',
shuffle = False,
validate_filenames = False
)
Found 488 non-validated image filenames belonging to 4 classes.
inpt = layers.Input(shape=(128, 128, 1))
output = layers.Flatten()(inpt)
output = layers.Dense(1024, activation = "relu")(output)
output = layers.Dense(512, activation = "relu")(output)
output = layers.Dense(256, activation = "relu")(output)
output = layers.Dense(4, activation = "softmax")(output)
model = models.Model(inputs = inpt, outputs = output)
model.compile(optimizer = optimizers.SGD(learning_rate = 0.01), loss = "categorical_crossentropy", metrics = ["accuracy"])
model.summary()
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 128, 128, 1)] 0
flatten_1 (Flatten) (None, 16384) 0
dense_4 (Dense) (None, 1024) 16778240
dense_5 (Dense) (None, 512) 524800
dense_6 (Dense) (None, 256) 131328
dense_7 (Dense) (None, 4) 1028
=================================================================
Total params: 17,435,396
Trainable params: 17,435,396
Non-trainable params: 0
_________________________________________________________________
history = model.fit(TrainData, validation_data = ValidationData, epochs=15)
Epoch 1/15 3/46 [>.............................] - ETA: 13s - loss: 1.6024 - accuracy: 0.3125
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 18s 387ms/step - loss: 1.3669 - accuracy: 0.3529 - val_loss: 1.3703 - val_accuracy: 0.3402 Epoch 2/15 46/46 [==============================] - 17s 371ms/step - loss: 1.2373 - accuracy: 0.4423 - val_loss: 1.2359 - val_accuracy: 0.4180 Epoch 3/15 46/46 [==============================] - 16s 354ms/step - loss: 1.1945 - accuracy: 0.4669 - val_loss: 1.1569 - val_accuracy: 0.4980 Epoch 4/15 46/46 [==============================] - 17s 368ms/step - loss: 1.1271 - accuracy: 0.4942 - val_loss: 1.1346 - val_accuracy: 0.5615 Epoch 5/15 46/46 [==============================] - 17s 378ms/step - loss: 1.0799 - accuracy: 0.5338 - val_loss: 1.1152 - val_accuracy: 0.5246 Epoch 6/15 46/46 [==============================] - 17s 370ms/step - loss: 1.0541 - accuracy: 0.5427 - val_loss: 1.1924 - val_accuracy: 0.4426 Epoch 7/15 46/46 [==============================] - 17s 374ms/step - loss: 1.0024 - accuracy: 0.5802 - val_loss: 1.0287 - val_accuracy: 0.5840 Epoch 8/15 46/46 [==============================] - 17s 375ms/step - loss: 0.9547 - accuracy: 0.6089 - val_loss: 1.1280 - val_accuracy: 0.4795 Epoch 9/15 46/46 [==============================] - 17s 373ms/step - loss: 0.9382 - accuracy: 0.6055 - val_loss: 1.0940 - val_accuracy: 0.5225 Epoch 10/15 46/46 [==============================] - 17s 379ms/step - loss: 0.9301 - accuracy: 0.6123 - val_loss: 1.2376 - val_accuracy: 0.5225 Epoch 11/15 46/46 [==============================] - 16s 351ms/step - loss: 0.8915 - accuracy: 0.6314 - val_loss: 0.9772 - val_accuracy: 0.6168 Epoch 12/15 46/46 [==============================] - 16s 346ms/step - loss: 0.8594 - accuracy: 0.6601 - val_loss: 1.2411 - val_accuracy: 0.4303 Epoch 13/15 46/46 [==============================] - 17s 369ms/step - loss: 0.8601 - accuracy: 0.6505 - val_loss: 0.9677 - val_accuracy: 0.5451 Epoch 14/15 46/46 [==============================] - 17s 360ms/step - loss: 0.8099 - accuracy: 0.6969 - val_loss: 1.0802 - val_accuracy: 0.5184 Epoch 15/15 46/46 [==============================] - 16s 354ms/step - loss: 0.8270 - accuracy: 0.6614 - val_loss: 1.0591 - val_accuracy: 0.5553
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred = model.predict(TrainData, verbose = 1)
y_pred_bool = np.argmax(y_pred, axis = 1)
46/46 [==============================] - 12s 267ms/step
y_pred
array([[0.60478586, 0.02095544, 0.18702734, 0.18723132],
[0.00701132, 0.01223822, 0.13029899, 0.8504514 ],
[0.07892513, 0.07490325, 0.78316903, 0.06300268],
...,
[0.13052201, 0.25222442, 0.4414409 , 0.1758127 ],
[0.02126547, 0.5225643 , 0.44746765, 0.00870262],
[0.26426035, 0.20491996, 0.3594339 , 0.17138588]], dtype=float32)
y_pred_bool
array([0, 3, 2, ..., 2, 1, 2])
y_pred_one_hot = [str(to_categorical(i, 4)) for i in y_pred_bool]
y_pred_one_hot
['[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 0. 1.]', '[0. 1. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', '[1. 0. 0. 0.]', '[0. 1. 0. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[0. 0. 1. 0.]', '[1. 0. 0. 0.]', ...]
print(classification_report(df['label'][0:len(y_pred_bool)], y_pred_one_hot))
precision recall f1-score support
[0. 0. 0. 1.] 0.27 0.08 0.13 339
[0. 0. 1. 0.] 0.24 0.55 0.34 359
[0. 1. 0. 0.] 0.23 0.11 0.15 369
[1. 0. 0. 0.] 0.27 0.25 0.26 398
accuracy 0.25 1465
macro avg 0.26 0.25 0.22 1465
weighted avg 0.26 0.25 0.22 1465
validation_y_pred = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool = np.argmax(validation_y_pred, axis = 1)
validation_y_pred_one_hot = [str(to_categorical(i, 4)) for i in validation_y_pred_bool]
7/16 [============>.................] - ETA: 1s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 203ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot))
precision recall f1-score support
[0. 0. 0. 1.] 0.33 0.07 0.11 120
[0. 0. 1. 0.] 0.20 0.59 0.30 103
[0. 1. 0. 0.] 0.30 0.13 0.18 140
[1. 0. 0. 0.] 0.32 0.27 0.29 125
accuracy 0.25 488
macro avg 0.29 0.26 0.22 488
weighted avg 0.29 0.25 0.22 488
refrence : https://machinelearningmastery.com/gradient-descent-with-momentum-from-scratch/
model_2 = models.Model(inputs = inpt, outputs = output)
model_2.compile(optimizer = optimizers.SGD(learning_rate = 0.01, momentum = 0.5), loss = "categorical_crossentropy", metrics = ["accuracy"])
history_2 = model_2.fit(TrainData, validation_data = ValidationData, epochs=15)
Epoch 1/15 5/46 [==>...........................] - ETA: 11s - loss: 0.7966 - accuracy: 0.6687
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 18s 400ms/step - loss: 0.8733 - accuracy: 0.6410 - val_loss: 1.1328 - val_accuracy: 0.4836 Epoch 2/15 46/46 [==============================] - 18s 382ms/step - loss: 0.8327 - accuracy: 0.6601 - val_loss: 0.8878 - val_accuracy: 0.6311 Epoch 3/15 46/46 [==============================] - 17s 374ms/step - loss: 0.7842 - accuracy: 0.6785 - val_loss: 1.3013 - val_accuracy: 0.5041 Epoch 4/15 46/46 [==============================] - 17s 360ms/step - loss: 0.7591 - accuracy: 0.7072 - val_loss: 0.9240 - val_accuracy: 0.6107 Epoch 5/15 46/46 [==============================] - 17s 366ms/step - loss: 0.7019 - accuracy: 0.7229 - val_loss: 0.8415 - val_accuracy: 0.6332 Epoch 6/15 46/46 [==============================] - 17s 362ms/step - loss: 0.6431 - accuracy: 0.7556 - val_loss: 1.2281 - val_accuracy: 0.4877 Epoch 7/15 46/46 [==============================] - 17s 360ms/step - loss: 0.6749 - accuracy: 0.7331 - val_loss: 1.0779 - val_accuracy: 0.5533 Epoch 8/15 46/46 [==============================] - 17s 359ms/step - loss: 0.6147 - accuracy: 0.7700 - val_loss: 0.7873 - val_accuracy: 0.7131 Epoch 9/15 46/46 [==============================] - 16s 358ms/step - loss: 0.5781 - accuracy: 0.7809 - val_loss: 0.9827 - val_accuracy: 0.6475 Epoch 10/15 46/46 [==============================] - 17s 366ms/step - loss: 0.5230 - accuracy: 0.8082 - val_loss: 0.8940 - val_accuracy: 0.6680 Epoch 11/15 46/46 [==============================] - 16s 357ms/step - loss: 0.5315 - accuracy: 0.8007 - val_loss: 0.8928 - val_accuracy: 0.6352 Epoch 12/15 46/46 [==============================] - 17s 364ms/step - loss: 0.5252 - accuracy: 0.8116 - val_loss: 1.5992 - val_accuracy: 0.4775 Epoch 13/15 46/46 [==============================] - 17s 364ms/step - loss: 0.5586 - accuracy: 0.7966 - val_loss: 0.7587 - val_accuracy: 0.7418 Epoch 14/15 46/46 [==============================] - 17s 361ms/step - loss: 0.4831 - accuracy: 0.8205 - val_loss: 0.8537 - val_accuracy: 0.6926 Epoch 15/15 46/46 [==============================] - 17s 362ms/step - loss: 0.4230 - accuracy: 0.8539 - val_loss: 0.8240 - val_accuracy: 0.7295
plt.plot(history_2.history['loss'])
plt.plot(history_2.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_2.history['accuracy'])
plt.plot(history_2.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_2 = model_2.predict(TrainData, verbose = 1)
y_pred_bool_2 = np.argmax(y_pred_2, axis = 1)
y_pred_one_hot_2 = [str(to_categorical(i, 4)) for i in y_pred_bool_2]
46/46 [==============================] - 12s 261ms/step
print(classification_report(df['label'][0:len(y_pred_bool_2)], y_pred_one_hot_2))
precision recall f1-score support
[0. 0. 0. 1.] 0.23 0.29 0.26 339
[0. 0. 1. 0.] 0.24 0.27 0.25 359
[0. 1. 0. 0.] 0.26 0.23 0.25 369
[1. 0. 0. 0.] 0.28 0.21 0.24 398
accuracy 0.25 1465
macro avg 0.25 0.25 0.25 1465
weighted avg 0.25 0.25 0.25 1465
validation_y_pred_2 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_2 = np.argmax(validation_y_pred_2, axis = 1)
validation_y_pred_one_hot_2 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_2]
7/16 [============>.................] - ETA: 1s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 203ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_2))
precision recall f1-score support
[0. 0. 0. 1.] 0.34 0.32 0.33 120
[0. 0. 1. 0.] 0.24 0.39 0.30 103
[0. 1. 0. 0.] 0.28 0.24 0.25 140
[1. 0. 0. 0.] 0.36 0.26 0.30 125
accuracy 0.29 488
macro avg 0.30 0.30 0.29 488
weighted avg 0.31 0.29 0.29 488
model_3 = models.Model(inputs = inpt, outputs = output)
model_3.compile(optimizer = optimizers.SGD(learning_rate = 0.01, momentum = 0.9), loss = "categorical_crossentropy", metrics = ["accuracy"])
history_3 = model_3.fit(TrainData, validation_data = ValidationData, epochs=15)
Epoch 1/15 3/46 [>.............................] - ETA: 13s - loss: 0.2708 - accuracy: 0.9167
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 17s 377ms/step - loss: 1.4084 - accuracy: 0.4710 - val_loss: 1.2332 - val_accuracy: 0.4447 Epoch 2/15 46/46 [==============================] - 17s 371ms/step - loss: 1.1177 - accuracy: 0.5099 - val_loss: 1.1689 - val_accuracy: 0.4734 Epoch 3/15 46/46 [==============================] - 18s 381ms/step - loss: 0.9751 - accuracy: 0.5741 - val_loss: 1.1080 - val_accuracy: 0.5123 Epoch 4/15 46/46 [==============================] - 17s 365ms/step - loss: 0.9554 - accuracy: 0.5836 - val_loss: 1.5475 - val_accuracy: 0.4344 Epoch 5/15 46/46 [==============================] - 17s 361ms/step - loss: 0.9468 - accuracy: 0.5891 - val_loss: 1.2580 - val_accuracy: 0.4795 Epoch 6/15 46/46 [==============================] - 17s 365ms/step - loss: 0.9450 - accuracy: 0.6055 - val_loss: 1.1874 - val_accuracy: 0.5369 Epoch 7/15 46/46 [==============================] - 17s 361ms/step - loss: 0.8996 - accuracy: 0.6273 - val_loss: 1.0990 - val_accuracy: 0.5492 Epoch 8/15 46/46 [==============================] - 17s 368ms/step - loss: 0.9355 - accuracy: 0.5986 - val_loss: 1.0285 - val_accuracy: 0.5799 Epoch 9/15 46/46 [==============================] - 16s 357ms/step - loss: 0.8163 - accuracy: 0.6526 - val_loss: 1.1574 - val_accuracy: 0.5184 Epoch 10/15 46/46 [==============================] - 17s 364ms/step - loss: 0.7749 - accuracy: 0.6915 - val_loss: 1.2444 - val_accuracy: 0.5328 Epoch 11/15 46/46 [==============================] - 17s 357ms/step - loss: 0.8630 - accuracy: 0.6341 - val_loss: 0.9867 - val_accuracy: 0.6066 Epoch 12/15 46/46 [==============================] - 17s 362ms/step - loss: 0.7368 - accuracy: 0.6976 - val_loss: 1.3116 - val_accuracy: 0.5574 Epoch 13/15 46/46 [==============================] - 16s 359ms/step - loss: 0.7998 - accuracy: 0.6662 - val_loss: 1.1024 - val_accuracy: 0.5020 Epoch 14/15 46/46 [==============================] - 16s 362ms/step - loss: 0.7855 - accuracy: 0.6771 - val_loss: 1.1969 - val_accuracy: 0.5102 Epoch 15/15 46/46 [==============================] - 16s 359ms/step - loss: 0.7517 - accuracy: 0.6901 - val_loss: 1.3193 - val_accuracy: 0.4652
plt.plot(history_3.history['loss'])
plt.plot(history_3.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_3.history['accuracy'])
plt.plot(history_3.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_3 = model_3.predict(TrainData, verbose = 1)
y_pred_bool_3 = np.argmax(y_pred_3, axis = 1)
y_pred_one_hot_3 = [str(to_categorical(i, 4)) for i in y_pred_bool_3]
46/46 [==============================] - 12s 262ms/step
print(classification_report(df['label'][0:len(y_pred_bool_3)], y_pred_one_hot_3))
precision recall f1-score support
[0. 0. 0. 1.] 0.24 0.15 0.19 339
[0. 0. 1. 0.] 0.33 0.07 0.12 359
[0. 1. 0. 0.] 0.30 0.36 0.33 369
[1. 0. 0. 0.] 0.26 0.47 0.34 398
accuracy 0.27 1465
macro avg 0.28 0.27 0.24 1465
weighted avg 0.28 0.27 0.25 1465
validation_y_pred_3 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_3 = np.argmax(validation_y_pred_3, axis = 1)
validation_y_pred_one_hot_3 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_3]
7/16 [============>.................] - ETA: 1s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 201ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_3))
precision recall f1-score support
[0. 0. 0. 1.] 0.27 0.17 0.21 120
[0. 0. 1. 0.] 0.18 0.03 0.05 103
[0. 1. 0. 0.] 0.29 0.34 0.31 140
[1. 0. 0. 0.] 0.28 0.50 0.36 125
accuracy 0.27 488
macro avg 0.25 0.26 0.23 488
weighted avg 0.26 0.27 0.24 488
model_4 = models.Model(inputs = inpt, outputs = output)
model_4.compile(optimizer = optimizers.Adam(), loss = "categorical_crossentropy", metrics = ["accuracy"])
history_4 = model_4.fit(TrainData, validation_data = ValidationData, epochs=10)
Epoch 1/10 3/46 [>.............................] - ETA: 10s - loss: 7.0117 - accuracy: 0.2708
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 18s 382ms/step - loss: 1.7443 - accuracy: 0.4041 - val_loss: 1.3343 - val_accuracy: 0.4590 Epoch 2/10 46/46 [==============================] - 17s 365ms/step - loss: 1.2608 - accuracy: 0.4689 - val_loss: 1.0867 - val_accuracy: 0.5348 Epoch 3/10 46/46 [==============================] - 17s 369ms/step - loss: 1.0450 - accuracy: 0.5386 - val_loss: 1.0528 - val_accuracy: 0.5430 Epoch 4/10 46/46 [==============================] - 17s 366ms/step - loss: 0.9239 - accuracy: 0.6171 - val_loss: 1.0075 - val_accuracy: 0.5533 Epoch 5/10 46/46 [==============================] - 17s 369ms/step - loss: 0.9367 - accuracy: 0.6143 - val_loss: 1.0717 - val_accuracy: 0.5574 Epoch 6/10 46/46 [==============================] - 17s 378ms/step - loss: 0.7805 - accuracy: 0.6737 - val_loss: 0.9418 - val_accuracy: 0.6086 Epoch 7/10 46/46 [==============================] - 17s 374ms/step - loss: 0.8106 - accuracy: 0.6778 - val_loss: 1.0000 - val_accuracy: 0.5963 Epoch 8/10 46/46 [==============================] - 17s 376ms/step - loss: 0.8314 - accuracy: 0.6710 - val_loss: 1.0246 - val_accuracy: 0.5512 Epoch 9/10 46/46 [==============================] - 17s 376ms/step - loss: 0.6724 - accuracy: 0.7509 - val_loss: 1.1748 - val_accuracy: 0.5799 Epoch 10/10 46/46 [==============================] - 17s 373ms/step - loss: 0.6898 - accuracy: 0.7215 - val_loss: 1.1755 - val_accuracy: 0.5574
plt.plot(history_4.history['loss'])
plt.plot(history_4.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_4.history['accuracy'])
plt.plot(history_4.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_4 = model_4.predict(TrainData, verbose = 1)
y_pred_bool_4 = np.argmax(y_pred_4, axis = 1)
y_pred_one_hot_4 = [str(to_categorical(i, 4)) for i in y_pred_bool_4]
46/46 [==============================] - 12s 268ms/step
print(classification_report(df['label'][0:len(y_pred_bool_4)], y_pred_one_hot_4))
precision recall f1-score support
[0. 0. 0. 1.] 0.23 0.26 0.25 339
[0. 0. 1. 0.] 0.26 0.33 0.29 359
[0. 1. 0. 0.] 0.27 0.35 0.31 369
[1. 0. 0. 0.] 0.27 0.10 0.15 398
accuracy 0.26 1465
macro avg 0.26 0.26 0.25 1465
weighted avg 0.26 0.26 0.25 1465
validation_y_pred_4 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_4 = np.argmax(validation_y_pred_4, axis = 1)
validation_y_pred_one_hot_4 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_4]
7/16 [============>.................] - ETA: 1s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 204ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_4))
precision recall f1-score support
[0. 0. 0. 1.] 0.32 0.26 0.29 120
[0. 0. 1. 0.] 0.22 0.36 0.27 103
[0. 1. 0. 0.] 0.28 0.39 0.33 140
[1. 0. 0. 0.] 0.44 0.12 0.19 125
accuracy 0.28 488
macro avg 0.32 0.28 0.27 488
weighted avg 0.32 0.28 0.27 488
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_4.history['accuracy'])
plt.plot(history_4.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
print(classification_report(df['label'][0:len(y_pred_bool)], y_pred_one_hot))
precision recall f1-score support
[0. 0. 0. 1.] 0.27 0.08 0.13 339
[0. 0. 1. 0.] 0.24 0.55 0.34 359
[0. 1. 0. 0.] 0.23 0.11 0.15 369
[1. 0. 0. 0.] 0.27 0.25 0.26 398
accuracy 0.25 1465
macro avg 0.26 0.25 0.22 1465
weighted avg 0.26 0.25 0.22 1465
print(classification_report(df['label'][0:len(y_pred_bool_4)], y_pred_one_hot_4))
precision recall f1-score support
[0. 0. 0. 1.] 0.23 0.26 0.25 339
[0. 0. 1. 0.] 0.26 0.33 0.29 359
[0. 1. 0. 0.] 0.27 0.35 0.31 369
[1. 0. 0. 0.] 0.27 0.10 0.15 398
accuracy 0.26 1465
macro avg 0.26 0.26 0.25 1465
weighted avg 0.26 0.26 0.25 1465
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot))
precision recall f1-score support
[0. 0. 0. 1.] 0.33 0.07 0.11 120
[0. 0. 1. 0.] 0.20 0.59 0.30 103
[0. 1. 0. 0.] 0.30 0.13 0.18 140
[1. 0. 0. 0.] 0.32 0.27 0.29 125
accuracy 0.25 488
macro avg 0.29 0.26 0.22 488
weighted avg 0.29 0.25 0.22 488
print(classification_report(df['label'][len(y_pred_bool_4):], validation_y_pred_one_hot_4))
precision recall f1-score support
[0. 0. 0. 1.] 0.32 0.26 0.29 120
[0. 0. 1. 0.] 0.22 0.36 0.27 103
[0. 1. 0. 0.] 0.28 0.39 0.33 140
[1. 0. 0. 0.] 0.44 0.12 0.19 125
accuracy 0.28 488
macro avg 0.32 0.28 0.27 488
weighted avg 0.32 0.28 0.27 488
model_5 = models.Model(inputs = inpt, outputs = output)
model_5.compile(optimizer = optimizers.Adam(), loss = "categorical_crossentropy", metrics = ["accuracy"])
history_5 = model_5.fit(TrainData, validation_data = ValidationData, epochs=20)
Epoch 1/20 1/46 [..............................] - ETA: 26s - loss: 0.8725 - accuracy: 0.6875
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 18s 397ms/step - loss: 1.0266 - accuracy: 0.6266 - val_loss: 1.1167 - val_accuracy: 0.5451 Epoch 2/20 46/46 [==============================] - 18s 392ms/step - loss: 0.6886 - accuracy: 0.7331 - val_loss: 0.9228 - val_accuracy: 0.6537 Epoch 3/20 46/46 [==============================] - 18s 397ms/step - loss: 0.5515 - accuracy: 0.7850 - val_loss: 1.1316 - val_accuracy: 0.5820 Epoch 4/20 46/46 [==============================] - 18s 394ms/step - loss: 0.5785 - accuracy: 0.7741 - val_loss: 0.8841 - val_accuracy: 0.6844 Epoch 5/20 46/46 [==============================] - 17s 374ms/step - loss: 0.5434 - accuracy: 0.7939 - val_loss: 0.8894 - val_accuracy: 0.7131 Epoch 6/20 46/46 [==============================] - 17s 371ms/step - loss: 0.4126 - accuracy: 0.8410 - val_loss: 0.9919 - val_accuracy: 0.7049 Epoch 7/20 46/46 [==============================] - 17s 365ms/step - loss: 0.5296 - accuracy: 0.7925 - val_loss: 1.3199 - val_accuracy: 0.5512 Epoch 8/20 46/46 [==============================] - 17s 366ms/step - loss: 0.4023 - accuracy: 0.8491 - val_loss: 1.6016 - val_accuracy: 0.6045 Epoch 9/20 46/46 [==============================] - 17s 370ms/step - loss: 0.3733 - accuracy: 0.8539 - val_loss: 1.8940 - val_accuracy: 0.5164 Epoch 10/20 46/46 [==============================] - 17s 368ms/step - loss: 0.4094 - accuracy: 0.8655 - val_loss: 1.8174 - val_accuracy: 0.5389 Epoch 11/20 46/46 [==============================] - 17s 371ms/step - loss: 0.4193 - accuracy: 0.8375 - val_loss: 1.6239 - val_accuracy: 0.5902 Epoch 12/20 46/46 [==============================] - 17s 372ms/step - loss: 0.5292 - accuracy: 0.8020 - val_loss: 0.8256 - val_accuracy: 0.7520 Epoch 13/20 46/46 [==============================] - 17s 372ms/step - loss: 0.3112 - accuracy: 0.8887 - val_loss: 2.0452 - val_accuracy: 0.5266 Epoch 14/20 46/46 [==============================] - 17s 368ms/step - loss: 0.3219 - accuracy: 0.8901 - val_loss: 0.8986 - val_accuracy: 0.7172 Epoch 15/20 46/46 [==============================] - 17s 369ms/step - loss: 0.2694 - accuracy: 0.9065 - val_loss: 0.9925 - val_accuracy: 0.6803 Epoch 16/20 46/46 [==============================] - 17s 370ms/step - loss: 0.4165 - accuracy: 0.8512 - val_loss: 0.8424 - val_accuracy: 0.7275 Epoch 17/20 46/46 [==============================] - 17s 369ms/step - loss: 0.3706 - accuracy: 0.8696 - val_loss: 0.9739 - val_accuracy: 0.7070 Epoch 18/20 46/46 [==============================] - 17s 368ms/step - loss: 0.2532 - accuracy: 0.9106 - val_loss: 1.4137 - val_accuracy: 0.6475 Epoch 19/20 46/46 [==============================] - 18s 381ms/step - loss: 0.2298 - accuracy: 0.9283 - val_loss: 1.2561 - val_accuracy: 0.6742 Epoch 20/20 46/46 [==============================] - 18s 380ms/step - loss: 0.2559 - accuracy: 0.9106 - val_loss: 1.2596 - val_accuracy: 0.7295
plt.plot(history_5.history['loss'])
plt.plot(history_5.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_5.history['accuracy'])
plt.plot(history_5.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_5 = model_5.predict(TrainData, verbose = 1)
y_pred_bool_5 = np.argmax(y_pred_5, axis = 1)
y_pred_one_hot_5 = [str(to_categorical(i, 4)) for i in y_pred_bool_5]
46/46 [==============================] - 13s 282ms/step
print(classification_report(df['label'][0:len(y_pred_bool_5)], y_pred_one_hot_5))
precision recall f1-score support
[0. 0. 0. 1.] 0.22 0.22 0.22 339
[0. 0. 1. 0.] 0.24 0.25 0.25 359
[0. 1. 0. 0.] 0.28 0.32 0.30 369
[1. 0. 0. 0.] 0.28 0.24 0.26 398
accuracy 0.26 1465
macro avg 0.26 0.26 0.26 1465
weighted avg 0.26 0.26 0.26 1465
validation_y_pred_5 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_5 = np.argmax(validation_y_pred_5, axis = 1)
validation_y_pred_one_hot_5 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_5]
7/16 [============>.................] - ETA: 2s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 207ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_5))
precision recall f1-score support
[0. 0. 0. 1.] 0.31 0.25 0.28 120
[0. 0. 1. 0.] 0.21 0.26 0.23 103
[0. 1. 0. 0.] 0.28 0.35 0.31 140
[1. 0. 0. 0.] 0.33 0.23 0.27 125
accuracy 0.28 488
macro avg 0.28 0.27 0.27 488
weighted avg 0.29 0.28 0.28 488
model_6 = models.Model(inputs = inpt, outputs = output)
model_6.compile(optimizer = optimizers.Adam(), loss = "categorical_crossentropy", metrics = ["accuracy"])
history_6 = model_6.fit(TrainData, validation_data = ValidationData, epochs=50)
Epoch 1/50 5/46 [==>...........................] - ETA: 12s - loss: 1.8425 - accuracy: 0.7375
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 19s 405ms/step - loss: 0.5572 - accuracy: 0.8457 - val_loss: 0.9106 - val_accuracy: 0.7684 Epoch 2/50 46/46 [==============================] - 18s 389ms/step - loss: 0.2257 - accuracy: 0.9229 - val_loss: 1.1586 - val_accuracy: 0.7213 Epoch 3/50 46/46 [==============================] - 17s 382ms/step - loss: 0.3079 - accuracy: 0.8956 - val_loss: 1.0023 - val_accuracy: 0.7541 Epoch 4/50 46/46 [==============================] - 18s 385ms/step - loss: 0.3365 - accuracy: 0.8962 - val_loss: 1.1168 - val_accuracy: 0.7111 Epoch 5/50 46/46 [==============================] - 17s 379ms/step - loss: 0.2155 - accuracy: 0.9304 - val_loss: 0.9796 - val_accuracy: 0.7582 Epoch 6/50 46/46 [==============================] - 17s 373ms/step - loss: 0.0993 - accuracy: 0.9618 - val_loss: 1.0908 - val_accuracy: 0.7643 Epoch 7/50 46/46 [==============================] - 17s 368ms/step - loss: 0.1523 - accuracy: 0.9468 - val_loss: 1.0207 - val_accuracy: 0.7398 Epoch 8/50 46/46 [==============================] - 17s 370ms/step - loss: 0.1851 - accuracy: 0.9304 - val_loss: 1.2807 - val_accuracy: 0.7234 Epoch 9/50 46/46 [==============================] - 17s 370ms/step - loss: 0.1666 - accuracy: 0.9420 - val_loss: 1.7639 - val_accuracy: 0.6537 Epoch 10/50 46/46 [==============================] - 17s 369ms/step - loss: 0.3605 - accuracy: 0.8826 - val_loss: 1.0052 - val_accuracy: 0.7643 Epoch 11/50 46/46 [==============================] - 17s 372ms/step - loss: 0.1454 - accuracy: 0.9515 - val_loss: 1.2251 - val_accuracy: 0.6926 Epoch 12/50 46/46 [==============================] - 17s 372ms/step - loss: 0.1745 - accuracy: 0.9358 - val_loss: 1.3564 - val_accuracy: 0.6803 Epoch 13/50 46/46 [==============================] - 17s 372ms/step - loss: 0.1536 - accuracy: 0.9468 - val_loss: 0.9946 - val_accuracy: 0.7746 Epoch 14/50 46/46 [==============================] - 17s 368ms/step - loss: 0.3096 - accuracy: 0.8860 - val_loss: 1.1331 - val_accuracy: 0.6721 Epoch 15/50 46/46 [==============================] - 17s 372ms/step - loss: 0.1851 - accuracy: 0.9392 - val_loss: 1.1177 - val_accuracy: 0.7193 Epoch 16/50 46/46 [==============================] - 17s 369ms/step - loss: 0.2212 - accuracy: 0.9208 - val_loss: 1.1315 - val_accuracy: 0.7213 Epoch 17/50 46/46 [==============================] - 17s 367ms/step - loss: 0.3396 - accuracy: 0.8846 - val_loss: 1.0058 - val_accuracy: 0.7623 Epoch 18/50 46/46 [==============================] - 17s 368ms/step - loss: 0.1402 - accuracy: 0.9618 - val_loss: 1.5753 - val_accuracy: 0.6742 Epoch 19/50 46/46 [==============================] - 17s 371ms/step - loss: 0.0672 - accuracy: 0.9802 - val_loss: 1.2665 - val_accuracy: 0.7889 Epoch 20/50 46/46 [==============================] - 17s 372ms/step - loss: 0.2330 - accuracy: 0.9290 - val_loss: 2.2964 - val_accuracy: 0.5533 Epoch 21/50 46/46 [==============================] - 17s 368ms/step - loss: 0.2927 - accuracy: 0.8949 - val_loss: 1.1679 - val_accuracy: 0.7213 Epoch 22/50 46/46 [==============================] - 17s 376ms/step - loss: 0.0870 - accuracy: 0.9768 - val_loss: 1.0295 - val_accuracy: 0.7746 Epoch 23/50 46/46 [==============================] - 17s 371ms/step - loss: 0.0382 - accuracy: 0.9870 - val_loss: 1.3858 - val_accuracy: 0.7664 Epoch 24/50 46/46 [==============================] - 17s 371ms/step - loss: 0.0423 - accuracy: 0.9898 - val_loss: 1.4582 - val_accuracy: 0.7725 Epoch 25/50 46/46 [==============================] - 17s 367ms/step - loss: 0.4992 - accuracy: 0.8389 - val_loss: 1.3846 - val_accuracy: 0.5451 Epoch 26/50 46/46 [==============================] - 17s 368ms/step - loss: 0.3211 - accuracy: 0.8942 - val_loss: 1.0399 - val_accuracy: 0.7684 Epoch 27/50 46/46 [==============================] - 17s 373ms/step - loss: 0.1166 - accuracy: 0.9645 - val_loss: 1.1608 - val_accuracy: 0.7766 Epoch 28/50 46/46 [==============================] - 17s 368ms/step - loss: 0.1381 - accuracy: 0.9502 - val_loss: 1.3562 - val_accuracy: 0.7500 Epoch 29/50 46/46 [==============================] - 17s 369ms/step - loss: 0.1961 - accuracy: 0.9304 - val_loss: 1.2118 - val_accuracy: 0.7459 Epoch 30/50 46/46 [==============================] - 17s 373ms/step - loss: 0.3857 - accuracy: 0.8689 - val_loss: 1.0301 - val_accuracy: 0.7869 Epoch 31/50 46/46 [==============================] - 17s 372ms/step - loss: 0.0845 - accuracy: 0.9747 - val_loss: 1.0813 - val_accuracy: 0.7725 Epoch 32/50 46/46 [==============================] - 17s 370ms/step - loss: 0.1915 - accuracy: 0.9536 - val_loss: 2.0390 - val_accuracy: 0.5738 Epoch 33/50 46/46 [==============================] - 17s 369ms/step - loss: 0.5995 - accuracy: 0.8109 - val_loss: 0.9200 - val_accuracy: 0.6783 Epoch 34/50 46/46 [==============================] - 17s 369ms/step - loss: 0.2847 - accuracy: 0.9010 - val_loss: 0.9689 - val_accuracy: 0.7275 Epoch 35/50 46/46 [==============================] - 17s 369ms/step - loss: 0.1528 - accuracy: 0.9522 - val_loss: 1.0519 - val_accuracy: 0.7602 Epoch 36/50 46/46 [==============================] - 17s 365ms/step - loss: 0.0664 - accuracy: 0.9795 - val_loss: 1.1424 - val_accuracy: 0.7602 Epoch 37/50 46/46 [==============================] - 17s 373ms/step - loss: 0.1274 - accuracy: 0.9590 - val_loss: 1.4993 - val_accuracy: 0.7357 Epoch 38/50 46/46 [==============================] - 17s 369ms/step - loss: 0.0583 - accuracy: 0.9802 - val_loss: 1.0762 - val_accuracy: 0.7992 Epoch 39/50 46/46 [==============================] - 17s 370ms/step - loss: 0.0127 - accuracy: 0.9973 - val_loss: 1.1720 - val_accuracy: 0.7951 Epoch 40/50 46/46 [==============================] - 17s 370ms/step - loss: 0.0101 - accuracy: 0.9986 - val_loss: 1.2718 - val_accuracy: 0.7971 Epoch 41/50 46/46 [==============================] - 17s 373ms/step - loss: 0.0281 - accuracy: 0.9918 - val_loss: 1.3537 - val_accuracy: 0.7520 Epoch 42/50 46/46 [==============================] - 17s 371ms/step - loss: 0.2960 - accuracy: 0.9201 - val_loss: 1.0445 - val_accuracy: 0.7561 Epoch 43/50 46/46 [==============================] - 18s 382ms/step - loss: 0.1484 - accuracy: 0.9468 - val_loss: 1.0553 - val_accuracy: 0.7643 Epoch 44/50 46/46 [==============================] - 17s 380ms/step - loss: 0.0255 - accuracy: 0.9945 - val_loss: 1.1768 - val_accuracy: 0.7807 Epoch 45/50 46/46 [==============================] - 17s 372ms/step - loss: 0.0225 - accuracy: 0.9932 - val_loss: 1.3883 - val_accuracy: 0.7705 Epoch 46/50 46/46 [==============================] - 18s 395ms/step - loss: 0.0201 - accuracy: 0.9939 - val_loss: 1.3144 - val_accuracy: 0.7684 Epoch 47/50 46/46 [==============================] - 18s 390ms/step - loss: 0.0889 - accuracy: 0.9693 - val_loss: 1.5723 - val_accuracy: 0.7520 Epoch 48/50 46/46 [==============================] - 17s 372ms/step - loss: 0.2038 - accuracy: 0.9290 - val_loss: 1.2035 - val_accuracy: 0.7828 Epoch 49/50 46/46 [==============================] - 17s 371ms/step - loss: 0.0410 - accuracy: 0.9870 - val_loss: 1.2704 - val_accuracy: 0.7807 Epoch 50/50 46/46 [==============================] - 17s 375ms/step - loss: 0.0115 - accuracy: 0.9973 - val_loss: 1.3596 - val_accuracy: 0.7807
plt.plot(history_6.history['loss'])
plt.plot(history_6.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_6.history['accuracy'])
plt.plot(history_6.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_6 = model_6.predict(TrainData, verbose = 1)
y_pred_bool_6 = np.argmax(y_pred_6, axis = 1)
y_pred_one_hot_6 = [str(to_categorical(i, 4)) for i in y_pred_bool_6]
46/46 [==============================] - 12s 267ms/step
print(classification_report(df['label'][0:len(y_pred_bool_6)], y_pred_one_hot_6))
precision recall f1-score support
[0. 0. 0. 1.] 0.21 0.25 0.23 339
[0. 0. 1. 0.] 0.24 0.25 0.24 359
[0. 1. 0. 0.] 0.25 0.23 0.24 369
[1. 0. 0. 0.] 0.27 0.23 0.25 398
accuracy 0.24 1465
macro avg 0.24 0.24 0.24 1465
weighted avg 0.24 0.24 0.24 1465
validation_y_pred_6 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_6 = np.argmax(validation_y_pred_6, axis = 1)
validation_y_pred_one_hot_6 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_6]
7/16 [============>.................] - ETA: 1s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 215ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_6))
precision recall f1-score support
[0. 0. 0. 1.] 0.28 0.32 0.30 120
[0. 0. 1. 0.] 0.22 0.27 0.24 103
[0. 1. 0. 0.] 0.28 0.25 0.27 140
[1. 0. 0. 0.] 0.32 0.26 0.28 125
accuracy 0.27 488
macro avg 0.27 0.27 0.27 488
weighted avg 0.28 0.27 0.27 488
es = callbacks.EarlyStopping(monitor='accuracy', mode = 'max', verbose = 1)
model_7 = models.Model(inputs = inpt, outputs = output)
model_7.compile(optimizer = optimizers.Adam(), loss = "categorical_crossentropy", metrics = ["accuracy"])
history_7 = model_7.fit(TrainData, validation_data = ValidationData, epochs=100, callbacks = [es])
Epoch 1/100 1/46 [..............................] - ETA: 22s - loss: 4.5767e-04 - accuracy: 1.0000
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 18s 395ms/step - loss: 0.2934 - accuracy: 0.9195 - val_loss: 1.4317 - val_accuracy: 0.7275 Epoch 2/100 46/46 [==============================] - 17s 384ms/step - loss: 0.4245 - accuracy: 0.8778 - val_loss: 1.2976 - val_accuracy: 0.7336 Epoch 00002: early stopping
plt.plot(history_7.history['loss'])
plt.plot(history_7.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_7.history['accuracy'])
plt.plot(history_7.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_7 = model_7.predict(TrainData, verbose = 1)
y_pred_bool_7 = np.argmax(y_pred_7, axis = 1)
y_pred_one_hot_7 = [str(to_categorical(i, 4)) for i in y_pred_bool_7]
46/46 [==============================] - 12s 269ms/step
print(classification_report(df['label'][0:len(y_pred_bool_7)], y_pred_one_hot_7))
precision recall f1-score support
[0. 0. 0. 1.] 0.22 0.24 0.23 339
[0. 0. 1. 0.] 0.23 0.28 0.25 359
[0. 1. 0. 0.] 0.25 0.22 0.23 369
[1. 0. 0. 0.] 0.27 0.22 0.25 398
accuracy 0.24 1465
macro avg 0.24 0.24 0.24 1465
weighted avg 0.24 0.24 0.24 1465
validation_y_pred_7 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_7 = np.argmax(validation_y_pred_7, axis = 1)
validation_y_pred_one_hot_7 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_7]
7/16 [============>.................] - ETA: 2s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 219ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_7))
precision recall f1-score support
[0. 0. 0. 1.] 0.30 0.28 0.29 120
[0. 0. 1. 0.] 0.21 0.37 0.27 103
[0. 1. 0. 0.] 0.28 0.21 0.24 140
[1. 0. 0. 0.] 0.36 0.26 0.31 125
accuracy 0.27 488
macro avg 0.29 0.28 0.28 488
weighted avg 0.29 0.27 0.28 488
model_8 = models.Model(inputs = inpt, outputs = output)
model_8.compile(optimizer = optimizers.Adam(), loss = "mean_squared_error", metrics = ["accuracy"])
history_8 = model_8.fit(TrainData, validation_data = ValidationData, epochs=10)
Epoch 1/10 7/46 [===>..........................] - ETA: 11s - loss: 0.0448 - accuracy: 0.8795
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 18s 391ms/step - loss: 0.0262 - accuracy: 0.9324 - val_loss: 0.0939 - val_accuracy: 0.7705 Epoch 2/10 46/46 [==============================] - 17s 369ms/step - loss: 0.0039 - accuracy: 0.9918 - val_loss: 0.0961 - val_accuracy: 0.7705 Epoch 3/10 46/46 [==============================] - 17s 371ms/step - loss: 0.0078 - accuracy: 0.9802 - val_loss: 0.0966 - val_accuracy: 0.7725 Epoch 4/10 46/46 [==============================] - 18s 383ms/step - loss: 0.0225 - accuracy: 0.9433 - val_loss: 0.2080 - val_accuracy: 0.5430 Epoch 5/10 46/46 [==============================] - 18s 387ms/step - loss: 0.0646 - accuracy: 0.8410 - val_loss: 0.1569 - val_accuracy: 0.6332 Epoch 6/10 46/46 [==============================] - 17s 370ms/step - loss: 0.0189 - accuracy: 0.9509 - val_loss: 0.0906 - val_accuracy: 0.7951 Epoch 7/10 46/46 [==============================] - 18s 386ms/step - loss: 0.0041 - accuracy: 0.9904 - val_loss: 0.1106 - val_accuracy: 0.7377 Epoch 8/10 46/46 [==============================] - 19s 407ms/step - loss: 0.0382 - accuracy: 0.9078 - val_loss: 0.1561 - val_accuracy: 0.6537 Epoch 9/10 46/46 [==============================] - 17s 382ms/step - loss: 0.0591 - accuracy: 0.8546 - val_loss: 0.1404 - val_accuracy: 0.6557 Epoch 10/10 46/46 [==============================] - 18s 390ms/step - loss: 0.0369 - accuracy: 0.9051 - val_loss: 0.1063 - val_accuracy: 0.7500
plt.plot(history_8.history['loss'])
plt.plot(history_8.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_8.history['accuracy'])
plt.plot(history_8.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_8 = model_8.predict(TrainData, verbose = 1)
y_pred_bool_8 = np.argmax(y_pred_8, axis = 1)
y_pred_one_hot_8 = [str(to_categorical(i, 4)) for i in y_pred_bool_8]
46/46 [==============================] - 13s 288ms/step
print(classification_report(df['label'][0:len(y_pred_bool_8)], y_pred_one_hot_8))
precision recall f1-score support
[0. 0. 0. 1.] 0.22 0.24 0.23 339
[0. 0. 1. 0.] 0.24 0.25 0.24 359
[0. 1. 0. 0.] 0.27 0.27 0.27 369
[1. 0. 0. 0.] 0.26 0.22 0.24 398
accuracy 0.25 1465
macro avg 0.25 0.25 0.24 1465
weighted avg 0.25 0.25 0.25 1465
validation_y_pred_8 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_8 = np.argmax(validation_y_pred_8, axis = 1)
validation_y_pred_one_hot_8 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_8]
7/16 [============>.................] - ETA: 1s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 214ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_8))
precision recall f1-score support
[0. 0. 0. 1.] 0.29 0.27 0.28 120
[0. 0. 1. 0.] 0.21 0.25 0.23 103
[0. 1. 0. 0.] 0.28 0.30 0.29 140
[1. 0. 0. 0.] 0.36 0.30 0.33 125
accuracy 0.28 488
macro avg 0.29 0.28 0.28 488
weighted avg 0.29 0.28 0.28 488
plt.plot(history_8.history['accuracy'])
plt.plot(history_8.history['val_accuracy'])
plt.title('MSE model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_4.history['accuracy'])
plt.plot(history_4.history['val_accuracy'])
plt.title('CCE model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
print(classification_report(df['label'][0:len(y_pred_bool_8)], y_pred_one_hot_8))
precision recall f1-score support
[0. 0. 0. 1.] 0.22 0.24 0.23 339
[0. 0. 1. 0.] 0.24 0.25 0.24 359
[0. 1. 0. 0.] 0.27 0.27 0.27 369
[1. 0. 0. 0.] 0.26 0.22 0.24 398
accuracy 0.25 1465
macro avg 0.25 0.25 0.24 1465
weighted avg 0.25 0.25 0.25 1465
print(classification_report(df['label'][0:len(y_pred_bool_4)], y_pred_one_hot_4))
precision recall f1-score support
[0. 0. 0. 1.] 0.23 0.26 0.25 339
[0. 0. 1. 0.] 0.26 0.33 0.29 359
[0. 1. 0. 0.] 0.27 0.35 0.31 369
[1. 0. 0. 0.] 0.27 0.10 0.15 398
accuracy 0.26 1465
macro avg 0.26 0.26 0.25 1465
weighted avg 0.26 0.26 0.25 1465
print(classification_report(df['label'][len(y_pred_bool_8):], validation_y_pred_one_hot_8))
precision recall f1-score support
[0. 0. 0. 1.] 0.29 0.27 0.28 120
[0. 0. 1. 0.] 0.21 0.25 0.23 103
[0. 1. 0. 0.] 0.28 0.30 0.29 140
[1. 0. 0. 0.] 0.36 0.30 0.33 125
accuracy 0.28 488
macro avg 0.29 0.28 0.28 488
weighted avg 0.29 0.28 0.28 488
print(classification_report(df['label'][len(y_pred_bool_4):], validation_y_pred_one_hot_4))
precision recall f1-score support
[0. 0. 0. 1.] 0.32 0.26 0.29 120
[0. 0. 1. 0.] 0.22 0.36 0.27 103
[0. 1. 0. 0.] 0.28 0.39 0.33 140
[1. 0. 0. 0.] 0.44 0.12 0.19 125
accuracy 0.28 488
macro avg 0.32 0.28 0.27 488
weighted avg 0.32 0.28 0.27 488
input2 = layers.Input(shape=(128, 128, 1))
output2 = layers.Flatten()(input2)
output2 = layers.Dense(1024, activation = "relu", kernel_regularizer = regularizers.l2(l2=0.0001))(output2)
output2 = layers.Dense(512, activation = "relu", kernel_regularizer = regularizers.l2(l2=0.0001))(output2)
output2 = layers.Dense(256, activation = "relu", kernel_regularizer = regularizers.l2(l2=0.0001))(output2)
output2 = layers.Dense(4, activation = "softmax", kernel_regularizer = regularizers.l2(l2=0.0001))(output2)
model_9 = models.Model(inputs = input2, outputs = output2)
model_9.compile(optimizer = optimizers.Adam(), loss = "categorical_crossentropy", metrics = ["accuracy"])
history_9 = model_9.fit(TrainData, validation_data = ValidationData, epochs=20)
Epoch 1/20 2/46 [>.............................] - ETA: 20s - loss: 11.7586 - accuracy: 0.3906
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 21s 444ms/step - loss: 4.7620 - accuracy: 0.3235 - val_loss: 1.6069 - val_accuracy: 0.4447 Epoch 2/20 46/46 [==============================] - 19s 410ms/step - loss: 1.5247 - accuracy: 0.4212 - val_loss: 1.4898 - val_accuracy: 0.3934 Epoch 3/20 46/46 [==============================] - 20s 425ms/step - loss: 1.4562 - accuracy: 0.4614 - val_loss: 1.3671 - val_accuracy: 0.4795 Epoch 4/20 46/46 [==============================] - 20s 432ms/step - loss: 1.3585 - accuracy: 0.4846 - val_loss: 1.3011 - val_accuracy: 0.5266 Epoch 5/20 46/46 [==============================] - 21s 453ms/step - loss: 1.2690 - accuracy: 0.5188 - val_loss: 1.3352 - val_accuracy: 0.4570 Epoch 6/20 46/46 [==============================] - 19s 423ms/step - loss: 1.2465 - accuracy: 0.5188 - val_loss: 1.3471 - val_accuracy: 0.4939 Epoch 7/20 46/46 [==============================] - 20s 435ms/step - loss: 1.2015 - accuracy: 0.5693 - val_loss: 1.3220 - val_accuracy: 0.4857 Epoch 8/20 46/46 [==============================] - 19s 410ms/step - loss: 1.1548 - accuracy: 0.5809 - val_loss: 1.2161 - val_accuracy: 0.5266 Epoch 9/20 46/46 [==============================] - 19s 411ms/step - loss: 1.1308 - accuracy: 0.5891 - val_loss: 1.2052 - val_accuracy: 0.5738 Epoch 10/20 46/46 [==============================] - 19s 401ms/step - loss: 1.0723 - accuracy: 0.6143 - val_loss: 1.1819 - val_accuracy: 0.5656 Epoch 11/20 46/46 [==============================] - 18s 389ms/step - loss: 1.0302 - accuracy: 0.6451 - val_loss: 1.1361 - val_accuracy: 0.6066 Epoch 12/20 46/46 [==============================] - 18s 396ms/step - loss: 0.9776 - accuracy: 0.6614 - val_loss: 1.0864 - val_accuracy: 0.6414 Epoch 13/20 46/46 [==============================] - 18s 393ms/step - loss: 0.9787 - accuracy: 0.6785 - val_loss: 1.2305 - val_accuracy: 0.5615 Epoch 14/20 46/46 [==============================] - 18s 394ms/step - loss: 0.9483 - accuracy: 0.6724 - val_loss: 1.0780 - val_accuracy: 0.6455 Epoch 15/20 46/46 [==============================] - 18s 393ms/step - loss: 0.8103 - accuracy: 0.7454 - val_loss: 1.3224 - val_accuracy: 0.4939 Epoch 16/20 46/46 [==============================] - 18s 392ms/step - loss: 0.8785 - accuracy: 0.7038 - val_loss: 1.4341 - val_accuracy: 0.4795 Epoch 17/20 46/46 [==============================] - 18s 393ms/step - loss: 0.9270 - accuracy: 0.6853 - val_loss: 1.2460 - val_accuracy: 0.5820 Epoch 18/20 46/46 [==============================] - 18s 392ms/step - loss: 0.7621 - accuracy: 0.7584 - val_loss: 1.3046 - val_accuracy: 0.5594 Epoch 19/20 46/46 [==============================] - 18s 396ms/step - loss: 0.6929 - accuracy: 0.7884 - val_loss: 1.2104 - val_accuracy: 0.6168 Epoch 20/20 46/46 [==============================] - 18s 392ms/step - loss: 0.7171 - accuracy: 0.7761 - val_loss: 1.3504 - val_accuracy: 0.5492
plt.plot(history_9.history['loss'])
plt.plot(history_9.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_9.history['accuracy'])
plt.plot(history_9.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_9 = model_9.predict(TrainData, verbose = 1)
y_pred_bool_9 = np.argmax(y_pred_9, axis = 1)
y_pred_one_hot_9 = [str(to_categorical(i, 4)) for i in y_pred_bool_9]
46/46 [==============================] - 12s 261ms/step
print(classification_report(df['label'][0:len(y_pred_bool_9)], y_pred_one_hot_9))
precision recall f1-score support
[0. 0. 0. 1.] 0.19 0.24 0.21 339
[0. 0. 1. 0.] 0.27 0.12 0.16 359
[0. 1. 0. 0.] 0.24 0.20 0.22 369
[1. 0. 0. 0.] 0.26 0.40 0.32 398
accuracy 0.24 1465
macro avg 0.24 0.24 0.23 1465
weighted avg 0.24 0.24 0.23 1465
validation_y_pred_9 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_9 = np.argmax(validation_y_pred_9, axis = 1)
validation_y_pred_one_hot_9 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_9]
7/16 [============>.................] - ETA: 1s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 202ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_9))
precision recall f1-score support
[0. 0. 0. 1.] 0.29 0.27 0.28 120
[0. 0. 1. 0.] 0.21 0.25 0.23 103
[0. 1. 0. 0.] 0.28 0.30 0.29 140
[1. 0. 0. 0.] 0.36 0.30 0.33 125
accuracy 0.28 488
macro avg 0.29 0.28 0.28 488
weighted avg 0.29 0.28 0.28 488
input3 = layers.Input(shape=(128, 128, 1))
output3 = layers.Flatten()(input3)
output3 = layers.Dense(1024, activation = "relu")(output3)
output3 = layers.Dropout(0.1)(output3)
output3 = layers.Dense(512, activation = "relu")(output3)
output3 = layers.Dropout(0.1)(output3)
output3 = layers.Dense(256, activation = "relu")(output3)
output3 = layers.Dropout(0.1)(output3)
output3 = layers.Dense(4, activation = "softmax")(output3)
model_10 = models.Model(inputs = input3, outputs = output3)
model_10.compile(optimizer = optimizers.Adam(), loss = "categorical_crossentropy", metrics = ["accuracy"])
model_10.summary()
Model: "model_10"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, 128, 128, 1)] 0
flatten_3 (Flatten) (None, 16384) 0
dense_12 (Dense) (None, 1024) 16778240
dropout (Dropout) (None, 1024) 0
dense_13 (Dense) (None, 512) 524800
dropout_1 (Dropout) (None, 512) 0
dense_14 (Dense) (None, 256) 131328
dropout_2 (Dropout) (None, 256) 0
dense_15 (Dense) (None, 4) 1028
=================================================================
Total params: 17,435,396
Trainable params: 17,435,396
Non-trainable params: 0
_________________________________________________________________
history_10 = model_10.fit(TrainData, validation_data = ValidationData, epochs=20)
Epoch 1/20
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
46/46 [==============================] - 19s 415ms/step - loss: 4.3172 - accuracy: 0.2737 - val_loss: 1.3527 - val_accuracy: 0.3033 Epoch 2/20 46/46 [==============================] - 17s 373ms/step - loss: 1.3756 - accuracy: 0.3495 - val_loss: 1.2650 - val_accuracy: 0.3750 Epoch 3/20 46/46 [==============================] - 18s 394ms/step - loss: 1.2717 - accuracy: 0.4068 - val_loss: 1.1985 - val_accuracy: 0.4385 Epoch 4/20 46/46 [==============================] - 17s 373ms/step - loss: 1.2282 - accuracy: 0.4396 - val_loss: 1.1849 - val_accuracy: 0.4119 Epoch 5/20 46/46 [==============================] - 17s 367ms/step - loss: 1.2224 - accuracy: 0.4259 - val_loss: 1.1463 - val_accuracy: 0.4590 Epoch 6/20 46/46 [==============================] - 17s 367ms/step - loss: 1.1655 - accuracy: 0.4505 - val_loss: 1.1590 - val_accuracy: 0.4385 Epoch 7/20 46/46 [==============================] - 17s 368ms/step - loss: 1.1425 - accuracy: 0.4696 - val_loss: 1.1137 - val_accuracy: 0.5143 Epoch 8/20 46/46 [==============================] - 17s 374ms/step - loss: 1.1437 - accuracy: 0.4539 - val_loss: 1.1175 - val_accuracy: 0.5184 Epoch 9/20 46/46 [==============================] - 17s 372ms/step - loss: 1.1385 - accuracy: 0.4546 - val_loss: 1.1342 - val_accuracy: 0.4857 Epoch 10/20 46/46 [==============================] - 17s 365ms/step - loss: 1.0850 - accuracy: 0.4942 - val_loss: 1.1178 - val_accuracy: 0.4467 Epoch 11/20 46/46 [==============================] - 17s 369ms/step - loss: 1.0830 - accuracy: 0.4840 - val_loss: 1.2331 - val_accuracy: 0.3934 Epoch 12/20 46/46 [==============================] - 17s 370ms/step - loss: 1.1049 - accuracy: 0.4771 - val_loss: 1.1603 - val_accuracy: 0.4303 Epoch 13/20 46/46 [==============================] - 17s 367ms/step - loss: 1.0935 - accuracy: 0.4744 - val_loss: 1.1355 - val_accuracy: 0.4754 Epoch 14/20 46/46 [==============================] - 17s 364ms/step - loss: 1.0501 - accuracy: 0.4833 - val_loss: 1.1810 - val_accuracy: 0.4652 Epoch 15/20 46/46 [==============================] - 17s 370ms/step - loss: 1.0044 - accuracy: 0.5331 - val_loss: 1.0707 - val_accuracy: 0.5451 Epoch 16/20 46/46 [==============================] - 17s 368ms/step - loss: 0.9986 - accuracy: 0.5413 - val_loss: 1.0867 - val_accuracy: 0.4959 Epoch 17/20 46/46 [==============================] - 17s 371ms/step - loss: 0.9765 - accuracy: 0.5543 - val_loss: 1.0535 - val_accuracy: 0.5410 Epoch 18/20 46/46 [==============================] - 17s 368ms/step - loss: 0.9388 - accuracy: 0.5481 - val_loss: 1.1320 - val_accuracy: 0.4590 Epoch 19/20 46/46 [==============================] - 18s 388ms/step - loss: 1.0832 - accuracy: 0.4887 - val_loss: 1.2074 - val_accuracy: 0.4508 Epoch 20/20 46/46 [==============================] - 17s 380ms/step - loss: 1.0246 - accuracy: 0.5290 - val_loss: 1.1325 - val_accuracy: 0.4754
plt.plot(history_10.history['loss'])
plt.plot(history_10.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history_10.history['accuracy'])
plt.plot(history_10.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
y_pred_10 = model_10.predict(TrainData, verbose = 1)
y_pred_bool_10 = np.argmax(y_pred_10, axis = 1)
y_pred_one_hot_10 = [str(to_categorical(i, 4)) for i in y_pred_bool_10]
46/46 [==============================] - 12s 271ms/step
print(classification_report(df['label'][0:len(y_pred_bool_10)], y_pred_one_hot_10))
precision recall f1-score support
[0. 0. 0. 1.] 0.24 0.12 0.16 339
[0. 0. 1. 0.] 0.26 0.05 0.09 359
[0. 1. 0. 0.] 0.27 0.63 0.37 369
[1. 0. 0. 0.] 0.27 0.23 0.25 398
accuracy 0.26 1465
macro avg 0.26 0.26 0.22 1465
weighted avg 0.26 0.26 0.22 1465
validation_y_pred_10 = model.predict(ValidationData, verbose = 1)
validation_y_pred_bool_10 = np.argmax(validation_y_pred_10, axis = 1)
validation_y_pred_one_hot_10 = [str(to_categorical(i, 4)) for i in validation_y_pred_bool_10]
7/16 [============>.................] - ETA: 2s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
16/16 [==============================] - 4s 224ms/step
print(classification_report(df['label'][len(y_pred_bool):], validation_y_pred_one_hot_10))
precision recall f1-score support
[0. 0. 0. 1.] 0.29 0.27 0.28 120
[0. 0. 1. 0.] 0.21 0.25 0.23 103
[0. 1. 0. 0.] 0.28 0.30 0.29 140
[1. 0. 0. 0.] 0.36 0.30 0.33 125
accuracy 0.28 488
macro avg 0.29 0.28 0.28 488
weighted avg 0.29 0.28 0.28 488
refrence : https://machinelearningmastery.com/dropout-regularization-deep-learning-models-keras/
test_path = "../dataset/test"
img_path = []
labels = []
for folder in os.listdir(test_path):
folder_path = os.path.join(test_path, folder)
for img in os.listdir(folder_path):
img_path.append(os.path.join(folder_path, img))
labels.append(folder)
df_2 = pd.DataFrame({
"image" : img_path,
"label" : labels
})
df_2.count()
image 839 label 839 dtype: int64
df_2['label'].value_counts().count()
4
df_2['label'].value_counts()
bald_eagle 225 racoon 219 elk 198 raven 197 Name: label, dtype: int64
plt.bar(
df_2['label'].value_counts().keys(),
df_2['label'].value_counts().values,
color='c',
width=.75
)
plt.xlabel("Class", fontweight ='bold')
plt.ylabel("Number", fontweight ='bold')
plt.title("Number of Each Class in Test Folder", fontweight ='bold')
plt.show()
for name in df_2['label'].value_counts().keys():
rand_num = random.randint(0, len(df_2))
while df_2['label'][rand_num] != name:
rand_num = random.randint(0, len(df_2))
img = load_img(df_2['image'][rand_num])
title = df_2['label'][rand_num]
plt.imshow(img)
plt.title(title)
plt.show()
for i in range(len(df_2)):
df_2['label'][i] = classes_labels[df_2['label'][i]]
df_2['label']
0 0
1 0
2 0
3 0
4 0
..
834 1
835 1
836 1
837 1
838 1
Name: label, Length: 839, dtype: object
one_hot = [str(to_categorical(i, df_2['label'].value_counts().count())) for i in df_2['label']]
df_2['label'] = one_hot
df_2['label']
0 [1. 0. 0. 0.]
1 [1. 0. 0. 0.]
2 [1. 0. 0. 0.]
3 [1. 0. 0. 0.]
4 [1. 0. 0. 0.]
...
834 [0. 1. 0. 0.]
835 [0. 1. 0. 0.]
836 [0. 1. 0. 0.]
837 [0. 1. 0. 0.]
838 [0. 1. 0. 0.]
Name: label, Length: 839, dtype: object
dataGenerator = preprocessing.image.ImageDataGenerator(
rescale = 1 / 255.0,
)
TestData = dataGenerator.flow_from_dataframe(
dataframe = df_2,
x_col = 'image',
y_col = 'label',
target_size = (128, 128),
color_mode = 'grayscale',
batch_size = 32,
class_mode = 'categorical',
subset = 'training',
shuffle = False,
validate_filenames = False
)
Found 839 non-validated image filenames belonging to 4 classes.
bool_to_name = {v: k for k, v in classes_labels.items()}
y_pred_test = model_10.predict(TestData, verbose = 1)
y_pred_bool_test = np.argmax(y_pred_test, axis = 1)
y_pred_one_hot_test = [str(to_categorical(i, 4)) for i in y_pred_bool_test]
y_pred_name_test = [bool_to_name[i] for i in y_pred_bool_test]
2/27 [=>............................] - ETA: 5s
/usr/lib/python3/dist-packages/PIL/Image.py:931: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images warnings.warn(
27/27 [==============================] - 6s 242ms/step
print(classification_report(df_2['label'], y_pred_one_hot_test))
precision recall f1-score support
[0. 0. 0. 1.] 0.15 0.09 0.11 197
[0. 0. 1. 0.] 0.02 0.01 0.01 198
[0. 1. 0. 0.] 0.29 0.63 0.39 219
[1. 0. 0. 0.] 0.25 0.21 0.23 225
accuracy 0.24 839
macro avg 0.18 0.23 0.19 839
weighted avg 0.18 0.24 0.19 839
for i in range(0, 10):
rand_num = random.randint(0, len(df_2))
while df_2['label'][rand_num] != y_pred_one_hot_test[rand_num]:
rand_num = random.randint(0, len(df_2))
img = load_img(df_2['image'][rand_num])
title = "Predicted : " + y_pred_name_test[rand_num]
plt.imshow(img)
plt.title(title)
plt.show()
for i in range(0, 10):
rand_num = random.randint(0, len(df_2))
while df_2['label'][rand_num] == y_pred_one_hot_test[rand_num]:
rand_num = random.randint(0, len(df_2))
img = load_img(df_2['image'][rand_num])
title = "Predicted : " + y_pred_name_test[rand_num]
plt.imshow(img)
plt.title(title)
plt.show()